"env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"flate2 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fs2 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"git2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"git2-curl 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"miniz-sys 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
+[[package]]
+name = "fs2"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
[[package]]
name = "gcc"
version = "0.3.25"
env_logger = "0.3"
filetime = "0.1"
flate2 = "0.2"
+fs2 = "0.2"
git2 = "0.4"
git2-curl = "0.4"
glob = "0.2"
}
fn search_directories(config: &Config) -> Vec<PathBuf> {
- let mut dirs = vec![config.home().join("bin")];
+ let mut dirs = vec![config.home().clone().into_path_unlocked().join("bin")];
if let Some(val) = env::var_os("PATH") {
dirs.extend(env::split_paths(&val));
}
extern crate docopt;
extern crate filetime;
extern crate flate2;
+extern crate fs2;
extern crate git2;
extern crate glob;
extern crate libc;
use std::ffi::OsString;
use std::fs::{self, File};
use std::io::prelude::*;
-use std::io;
+use std::io::SeekFrom;
use std::path::{Path, PathBuf};
use toml;
use ops::{self, CompileFilter};
use sources::{GitSource, PathSource, RegistrySource};
use util::{CargoResult, ChainError, Config, human, internal};
+use util::{Filesystem, FileLock};
#[derive(RustcDecodable, RustcEncodable)]
enum CrateListing {
V1(CrateListingV1),
+ Empty,
}
#[derive(RustcDecodable, RustcEncodable)]
specify alternate source"))))
};
- let mut list = try!(read_crate_list(&root));
- let dst = root.join("bin");
- try!(check_overwrites(&dst, &pkg, &opts.filter, &list));
+ // Preflight checks to check up front whether we'll overwrite something.
+ // We have to check this again afterwards, but may as well avoid building
+ // anything if we're gonna throw it away anyway.
+ {
+ let metadata = try!(metadata(config, &root));
+ let list = try!(read_crate_list(metadata.file()));
+ let dst = metadata.parent().join("bin");
+ try!(check_overwrites(&dst, &pkg, &opts.filter, &list));
+ }
let target_dir = if source_id.is_path() {
config.target_dir(&pkg)
found at `{}`", pkg, target_dir.display()))
}));
+ let metadata = try!(metadata(config, &root));
+ let mut list = try!(read_crate_list(metadata.file()));
+ let dst = metadata.parent().join("bin");
+ try!(check_overwrites(&dst, &pkg, &opts.filter, &list));
+
let mut t = Transaction { bins: Vec::new() };
try!(fs::create_dir_all(&dst));
for bin in compile.binaries.iter() {
}).extend(t.bins.iter().map(|t| {
t.file_name().unwrap().to_string_lossy().into_owned()
}));
- try!(write_crate_list(&root, list));
+ try!(write_crate_list(metadata.file(), list));
t.bins.truncate(0);
Ok(())
}
-fn read_crate_list(path: &Path) -> CargoResult<CrateListingV1> {
- let metadata = path.join(".crates.toml");
- let mut f = match File::open(&metadata) {
- Ok(f) => f,
- Err(e) => {
- if e.kind() == io::ErrorKind::NotFound {
- return Ok(CrateListingV1 { v1: BTreeMap::new() });
- }
- return Err(e).chain_error(|| {
- human(format!("failed to open crate metadata at `{}`",
- metadata.display()))
- });
- }
- };
+fn read_crate_list(mut file: &File) -> CargoResult<CrateListingV1> {
(|| -> CargoResult<_> {
let mut contents = String::new();
- try!(f.read_to_string(&mut contents));
+ try!(file.read_to_string(&mut contents));
let listing = try!(toml::decode_str(&contents).chain_error(|| {
internal("invalid TOML found for metadata")
}));
match listing {
CrateListing::V1(v1) => Ok(v1),
+ CrateListing::Empty => {
+ Ok(CrateListingV1 { v1: BTreeMap::new() })
+ }
}
}).chain_error(|| {
- human(format!("failed to parse crate metadata at `{}`",
- metadata.display()))
+ human("failed to parse crate metadata")
})
}
-fn write_crate_list(path: &Path, listing: CrateListingV1) -> CargoResult<()> {
- let metadata = path.join(".crates.toml");
+fn write_crate_list(mut file: &File, listing: CrateListingV1) -> CargoResult<()> {
(|| -> CargoResult<_> {
- let mut f = try!(File::create(&metadata));
+ try!(file.seek(SeekFrom::Start(0)));
+ try!(file.set_len(0));
let data = toml::encode_str::<CrateListing>(&CrateListing::V1(listing));
- try!(f.write_all(data.as_bytes()));
+ try!(file.write_all(data.as_bytes()));
Ok(())
}).chain_error(|| {
- human(format!("failed to write crate metadata at `{}`",
- metadata.display()))
+ human("failed to write crate metadata")
})
}
pub fn install_list(dst: Option<&str>, config: &Config) -> CargoResult<()> {
let dst = try!(resolve_root(dst, config));
- let list = try!(read_crate_list(&dst));
+ let dst = try!(metadata(config, &dst));
+ let list = try!(read_crate_list(dst.file()));
let mut shell = config.shell();
let out = shell.out();
for (k, v) in list.v1.iter() {
bins: &[String],
config: &Config) -> CargoResult<()> {
let root = try!(resolve_root(root, config));
- let mut metadata = try!(read_crate_list(&root));
+ let crate_metadata = try!(metadata(config, &root));
+ let mut metadata = try!(read_crate_list(crate_metadata.file()));
let mut to_remove = Vec::new();
{
let result = try!(PackageIdSpec::query_str(spec, metadata.v1.keys()))
Entry::Occupied(e) => e,
Entry::Vacant(..) => panic!("entry not found: {}", result),
};
- let dst = root.join("bin");
+ let dst = crate_metadata.parent().join("bin");
for bin in installed.get() {
let bin = dst.join(bin);
if fs::metadata(&bin).is_err() {
installed.remove();
}
}
- try!(write_crate_list(&root, metadata));
+ try!(write_crate_list(crate_metadata.file(), metadata));
for bin in to_remove {
try!(config.shell().status("Removing", bin.display()));
try!(fs::remove_file(bin));
Ok(())
}
-fn resolve_root(flag: Option<&str>, config: &Config) -> CargoResult<PathBuf> {
+fn metadata(config: &Config, root: &Filesystem) -> CargoResult<FileLock> {
+ root.open_rw(Path::new(".crates.toml"), config, "crate metadata")
+}
+
+fn resolve_root(flag: Option<&str>,
+ config: &Config) -> CargoResult<Filesystem> {
let config_root = try!(config.get_path("install.root"));
Ok(flag.map(PathBuf::from).or_else(|| {
env::var_os("CARGO_INSTALL_ROOT").map(PathBuf::from)
}).or_else(move || {
config_root.map(|v| v.val)
- }).unwrap_or_else(|| {
- config.home().to_owned()
+ }).map(Filesystem::new).unwrap_or_else(|| {
+ config.home().clone()
}))
}
use std::ffi::{OsStr, OsString};
use std::fs;
use std::io::prelude::*;
-use std::path::{self, PathBuf};
+use std::path::{self, PathBuf, Path};
use std::sync::Arc;
use core::{Package, PackageId, PackageSet, Target, Resolve};
use core::{Profile, Profiles};
-use util::{self, CargoResult, human};
+use util::{self, CargoResult, human, Filesystem};
use util::{Config, internal, ChainError, profile, join_paths};
use self::job::{Job, Work};
layout::Layout::new(config, root, Some(&target), &dest)
});
+ // For now we don't do any more finer-grained locking on the artifact
+ // directory, so just lock the entire thing for the duration of this
+ // compile.
+ let fs = Filesystem::new(host_layout.root().to_path_buf());
+ let path = Path::new(".cargo-lock");
+ let _lock = try!(fs.open_rw(path, config, "build directory"));
+
let mut cx = try!(Context::new(resolve, packages, config,
host_layout, target_layout,
build_config, profiles));
use std::fmt::{self, Debug, Formatter};
use std::hash::{Hash, Hasher, SipHasher};
use std::mem;
-use std::path::PathBuf;
use url::{self, Url};
use core::source::{Source, SourceId};
use core::GitReference;
use core::{Package, PackageId, Summary, Registry, Dependency};
-use util::{CargoResult, Config, to_hex};
+use util::{CargoResult, Config, FileLock, to_hex};
use sources::PathSource;
use sources::git::utils::{GitRemote, GitRevision};
pub struct GitSource<'cfg> {
remote: GitRemote,
reference: GitReference,
- db_path: PathBuf,
- checkout_path: PathBuf,
source_id: SourceId,
path_source: Option<PathSource<'cfg>>,
rev: Option<GitRevision>,
+ checkout_lock: Option<FileLock>,
+ ident: String,
config: &'cfg Config,
}
config: &'cfg Config) -> GitSource<'cfg> {
assert!(source_id.is_git(), "id is not git, id={}", source_id);
- let reference = match source_id.git_reference() {
- Some(reference) => reference,
- None => panic!("Not a git source; id={}", source_id),
- };
-
let remote = GitRemote::new(source_id.url());
let ident = ident(source_id.url());
- let db_path = config.git_db_path().join(&ident);
-
- let reference_path = match *reference {
- GitReference::Branch(ref s) |
- GitReference::Tag(ref s) |
- GitReference::Rev(ref s) => s.to_string(),
- };
- let checkout_path = config.git_checkout_path()
- .join(&ident)
- .join(&reference_path);
-
let reference = match source_id.precise() {
Some(s) => GitReference::Rev(s.to_string()),
None => source_id.git_reference().unwrap().clone(),
GitSource {
remote: remote,
reference: reference,
- db_path: db_path,
- checkout_path: checkout_path,
source_id: source_id.clone(),
path_source: None,
rev: None,
+ checkout_lock: None,
+ ident: ident,
config: config,
}
}
impl<'cfg> Source for GitSource<'cfg> {
fn update(&mut self) -> CargoResult<()> {
- let actual_rev = self.remote.rev_for(&self.db_path, &self.reference);
+ // First, lock both the global database and checkout locations that
+ // we're going to use. We may be performing a fetch into these locations
+ // so we need writable access.
+ let db_lock = format!(".cargo-lock-{}", self.ident);
+ let db_lock = try!(self.config.git_db_path()
+ .open_rw(&db_lock, self.config,
+ "the git database"));
+ let db_path = db_lock.parent().join(&self.ident);
+
+ let reference_path = match self.source_id.git_reference() {
+ Some(&GitReference::Branch(ref s)) |
+ Some(&GitReference::Tag(ref s)) |
+ Some(&GitReference::Rev(ref s)) => s,
+ None => panic!("not a git source"),
+ };
+ let checkout_lock = format!(".cargo-lock-{}-{}", self.ident,
+ reference_path);
+ let checkout_lock = try!(self.config.git_checkout_path()
+ .join(&self.ident)
+ .open_rw(&checkout_lock, self.config,
+ "the git checkout"));
+ let checkout_path = checkout_lock.parent().join(reference_path);
+
+ // Resolve our reference to an actual revision, and check if the
+ // databaes already has that revision. If it does, we just load a
+ // database pinned at that revision, and if we don't we issue an update
+ // to try to find the revision.
+ let actual_rev = self.remote.rev_for(&db_path, &self.reference);
let should_update = actual_rev.is_err() ||
self.source_id.precise().is_none();
format!("git repository `{}`", self.remote.url())));
trace!("updating git source `{:?}`", self.remote);
- let repo = try!(self.remote.checkout(&self.db_path));
+ let repo = try!(self.remote.checkout(&db_path));
let rev = try!(repo.rev_for(&self.reference));
(repo, rev)
} else {
- (try!(self.remote.db_at(&self.db_path)), actual_rev.unwrap())
+ (try!(self.remote.db_at(&db_path)), actual_rev.unwrap())
};
- try!(repo.copy_to(actual_rev.clone(), &self.checkout_path));
+ // Copy the database to the checkout location. After this we could drop
+ // the lock on the database as we no longer needed it, but we leave it
+ // in scope so the destructors here won't tamper with too much.
+ try!(repo.copy_to(actual_rev.clone(), &checkout_path));
let source_id = self.source_id.with_precise(Some(actual_rev.to_string()));
- let path_source = PathSource::new_recursive(&self.checkout_path,
+ let path_source = PathSource::new_recursive(&checkout_path,
&source_id,
self.config);
+ // Cache the information we just learned, and crucially also cache the
+ // lock on the checkout location. We wouldn't want someone else to come
+ // swipe our checkout location to another revision while we're using it!
self.path_source = Some(path_source);
self.rev = Some(actual_rev);
+ self.checkout_lock = Some(checkout_lock);
self.path_source.as_mut().unwrap().update()
}
//! ```
use std::collections::HashMap;
-use std::fs::{self, File};
+use std::fs::File;
+use std::io::SeekFrom;
use std::io::prelude::*;
-use std::path::PathBuf;
+use std::path::{PathBuf, Path};
use curl::http;
use flate2::read::GzDecoder;
use core::dependency::{Dependency, DependencyInner, Kind};
use sources::{PathSource, git};
use util::{CargoResult, Config, internal, ChainError, ToUrl, human};
-use util::{hex, Sha256, paths};
+use util::{hex, Sha256, paths, Filesystem, FileLock};
use ops;
-static DEFAULT: &'static str = "https://github.com/rust-lang/crates.io-index";
+const DEFAULT: &'static str = "https://github.com/rust-lang/crates.io-index";
+const INDEX_LOCK: &'static str = ".cargo-index-lock";
pub struct RegistrySource<'cfg> {
source_id: SourceId,
- checkout_path: PathBuf,
- cache_path: PathBuf,
- src_path: PathBuf,
+ checkout_path: Filesystem,
+ cache_path: Filesystem,
+ src_path: Filesystem,
config: &'cfg Config,
handle: Option<http::Handle>,
hashes: HashMap<(String, String), String>, // (name, vers) => cksum
///
/// This requires that the index has been at least checked out.
pub fn config(&self) -> CargoResult<RegistryConfig> {
- let contents = try!(paths::read(&self.checkout_path.join("config.json")));
+ let lock = try!(self.checkout_path.open_ro(Path::new(INDEX_LOCK),
+ self.config,
+ "the registry index"));
+ let path = lock.path().parent().unwrap();
+ let contents = try!(paths::read(&path.join("config.json")));
let config = try!(json::decode(&contents));
Ok(config)
}
- /// Open the git repository for the index of the registry.
- ///
- /// This will attempt to open an existing checkout, and failing that it will
- /// initialize a fresh new directory and git checkout. No remotes will be
- /// configured by default.
- fn open(&self) -> CargoResult<git2::Repository> {
- match git2::Repository::open(&self.checkout_path) {
- Ok(repo) => return Ok(repo),
- Err(..) => {}
- }
-
- try!(fs::create_dir_all(&self.checkout_path));
- let _ = fs::remove_dir_all(&self.checkout_path);
- let repo = try!(git2::Repository::init(&self.checkout_path));
- Ok(repo)
- }
-
/// Download the given package from the given url into the local cache.
///
/// This will perform the HTTP request to fetch the package. This function
///
/// No action is taken if the package is already downloaded.
fn download_package(&mut self, pkg: &PackageId, url: &Url)
- -> CargoResult<PathBuf> {
- // TODO: should discover filename from the S3 redirect
+ -> CargoResult<FileLock> {
let filename = format!("{}-{}.crate", pkg.name(), pkg.version());
- let dst = self.cache_path.join(&filename);
- if fs::metadata(&dst).is_ok() { return Ok(dst) }
+ let path = Path::new(&filename);
+ let mut dst = try!(self.cache_path.open_rw(path, self.config, &filename));
+ let meta = try!(dst.file().metadata());
+ if meta.len() > 0 {
+ return Ok(dst)
+ }
try!(self.config.shell().status("Downloading", pkg));
- try!(fs::create_dir_all(dst.parent().unwrap()));
let expected_hash = try!(self.hash(pkg));
let handle = match self.handle {
Some(ref mut handle) => handle,
bail!("failed to verify the checksum of `{}`", pkg)
}
- try!(paths::write(&dst, resp.get_body()));
+ try!(dst.write_all(resp.get_body()));
+ try!(dst.seek(SeekFrom::Start(0)));
Ok(dst)
}
/// compiled.
///
/// No action is taken if the source looks like it's already unpacked.
- fn unpack_package(&self, pkg: &PackageId, tarball: PathBuf)
+ fn unpack_package(&self,
+ pkg: &PackageId,
+ tarball: &FileLock)
-> CargoResult<PathBuf> {
let dst = self.src_path.join(&format!("{}-{}", pkg.name(),
pkg.version()));
- if fs::metadata(&dst.join(".cargo-ok")).is_ok() { return Ok(dst) }
+ try!(dst.create_dir());
+ // Note that we've already got the `tarball` locked above, and that
+ // implies a lock on the unpacked destination as well, so this access
+ // via `into_path_unlocked` should be ok.
+ let dst = dst.into_path_unlocked();
+ let ok = dst.join(".cargo-ok");
+ if ok.exists() {
+ return Ok(dst)
+ }
- try!(fs::create_dir_all(dst.parent().unwrap()));
- let f = try!(File::open(&tarball));
- let gz = try!(GzDecoder::new(f));
+ let gz = try!(GzDecoder::new(tarball.file()));
let mut tar = Archive::new(gz);
try!(tar.unpack(dst.parent().unwrap()));
- try!(File::create(&dst.join(".cargo-ok")));
+ try!(File::create(&ok));
Ok(dst)
}
if self.cache.contains_key(name) {
return Ok(self.cache.get(name).unwrap());
}
- // see module comment for why this is structured the way it is
- let path = self.checkout_path.clone();
- let fs_name = name.chars().flat_map(|c| c.to_lowercase()).collect::<String>();
- let path = match fs_name.len() {
- 1 => path.join("1").join(&fs_name),
- 2 => path.join("2").join(&fs_name),
- 3 => path.join("3").join(&fs_name[..1]).join(&fs_name),
- _ => path.join(&fs_name[0..2])
- .join(&fs_name[2..4])
- .join(&fs_name),
- };
- let summaries = match File::open(&path) {
+ let lock = self.checkout_path.open_ro(Path::new(INDEX_LOCK),
+ self.config,
+ "the registry index");
+ let file = lock.and_then(|lock| {
+ let path = lock.path().parent().unwrap();
+ let fs_name = name.chars().flat_map(|c| {
+ c.to_lowercase()
+ }).collect::<String>();
+
+ // see module comment for why this is structured the way it is
+ let path = match fs_name.len() {
+ 1 => path.join("1").join(&fs_name),
+ 2 => path.join("2").join(&fs_name),
+ 3 => path.join("3").join(&fs_name[..1]).join(&fs_name),
+ _ => path.join(&fs_name[0..2])
+ .join(&fs_name[2..4])
+ .join(&fs_name),
+ };
+ File::open(&path).map_err(human)
+ });
+ let summaries = match file {
Ok(mut f) => {
let mut contents = String::new();
try!(f.read_to_string(&mut contents));
/// Actually perform network operations to update the registry
fn do_update(&mut self) -> CargoResult<()> {
- if self.updated { return Ok(()) }
+ if self.updated {
+ return Ok(())
+ }
+ try!(self.checkout_path.create_dir());
+ let lock = try!(self.checkout_path.open_rw(Path::new(INDEX_LOCK),
+ self.config,
+ "the registry index"));
+ let path = lock.path().parent().unwrap();
try!(self.config.shell().status("Updating",
format!("registry `{}`", self.source_id.url())));
- let repo = try!(self.open());
+ let repo = try!(git2::Repository::open(path).or_else(|_| {
+ let _ = lock.remove_siblings();
+ git2::Repository::init(path)
+ }));
// git fetch origin
let url = self.source_id.url().to_string();
url.path_mut().unwrap().push(package.name().to_string());
url.path_mut().unwrap().push(package.version().to_string());
url.path_mut().unwrap().push("download".to_string());
- let path = try!(self.download_package(package, &url).chain_error(|| {
+ let krate = try!(self.download_package(package, &url).chain_error(|| {
internal(format!("failed to download package `{}` from {}",
package, url))
}));
- let path = try!(self.unpack_package(package, path).chain_error(|| {
+ let path = try!(self.unpack_package(package, &krate).chain_error(|| {
internal(format!("failed to unpack package `{}`", package))
}));
use std::env;
use std::fmt;
use std::fs::{self, File};
+use std::io::SeekFrom;
use std::io::prelude::*;
use std::mem;
use std::path::{Path, PathBuf};
use toml;
use core::shell::{Verbosity, ColorConfig};
use core::{MultiShell, Package};
-use util::{CargoResult, CargoError, ChainError, Rustc, internal, human, paths};
+use util::{CargoResult, CargoError, ChainError, Rustc, internal, human};
+use util::Filesystem;
use util::toml as cargo_toml;
use self::ConfigValue as CV;
pub struct Config {
- home_path: PathBuf,
+ home_path: Filesystem,
shell: RefCell<MultiShell>,
rustc_info: Rustc,
values: RefCell<HashMap<String, ConfigValue>>,
cwd: PathBuf,
homedir: PathBuf) -> CargoResult<Config> {
let mut cfg = Config {
- home_path: homedir,
+ home_path: Filesystem::new(homedir),
shell: RefCell::new(shell),
rustc_info: Rustc::blank(),
cwd: cwd,
Config::new(shell, cwd, homedir)
}
- pub fn home(&self) -> &Path { &self.home_path }
+ pub fn home(&self) -> &Filesystem { &self.home_path }
- pub fn git_db_path(&self) -> PathBuf {
+ pub fn git_db_path(&self) -> Filesystem {
self.home_path.join("git").join("db")
}
- pub fn git_checkout_path(&self) -> PathBuf {
+ pub fn git_checkout_path(&self) -> Filesystem {
self.home_path.join("git").join("checkouts")
}
- pub fn registry_index_path(&self) -> PathBuf {
+ pub fn registry_index_path(&self) -> Filesystem {
self.home_path.join("registry").join("index")
}
- pub fn registry_cache_path(&self) -> PathBuf {
+ pub fn registry_cache_path(&self) -> Filesystem {
self.home_path.join("registry").join("cache")
}
- pub fn registry_source_path(&self) -> PathBuf {
+ pub fn registry_source_path(&self) -> Filesystem {
self.home_path.join("registry").join("src")
}
Ok(())
}
-pub fn set_config(cfg: &Config, loc: Location, key: &str,
+pub fn set_config(cfg: &Config,
+ loc: Location,
+ key: &str,
value: ConfigValue) -> CargoResult<()> {
// TODO: There are a number of drawbacks here
//
// 1. Project is unimplemented
// 2. This blows away all comments in a file
// 3. This blows away the previous ordering of a file.
- let file = match loc {
- Location::Global => cfg.home_path.join("config"),
+ let mut file = match loc {
+ Location::Global => {
+ try!(cfg.home_path.create_dir());
+ try!(cfg.home_path.open_rw(Path::new("config"), cfg,
+ "the global config file"))
+ }
Location::Project => unimplemented!(),
};
- try!(fs::create_dir_all(file.parent().unwrap()));
- let contents = paths::read(&file).unwrap_or(String::new());
- let mut toml = try!(cargo_toml::parse(&contents, &file));
+ let mut contents = String::new();
+ let _ = file.read_to_string(&mut contents);
+ let mut toml = try!(cargo_toml::parse(&contents, file.path()));
toml.insert(key.to_string(), value.into_toml());
let contents = toml::Value::Table(toml).to_string();
- try!(paths::write(&file, contents.as_bytes()));
+ try!(file.seek(SeekFrom::Start(0)));
+ try!(file.write_all(contents.as_bytes()));
Ok(())
}
--- /dev/null
+use std::fs::{self, File, OpenOptions};
+use std::io::*;
+use std::io;
+use std::path::{Path, PathBuf};
+
+use term::color::CYAN;
+use fs2::{FileExt, lock_contended_error};
+
+use util::{CargoResult, ChainError, Config, human};
+
+pub struct FileLock {
+ f: Option<File>,
+ path: PathBuf,
+ state: State,
+}
+
+#[derive(PartialEq)]
+enum State {
+ Unlocked,
+ Shared,
+ Exclusive,
+}
+
+impl FileLock {
+ /// Returns the underlying file handle of this lock.
+ pub fn file(&self) -> &File {
+ self.f.as_ref().unwrap()
+ }
+
+ /// Returns the underlying path that this lock points to.
+ ///
+ /// Note that special care must be taken to ensure that the path is not
+ /// referenced outside the lifetime of this lock.
+ pub fn path(&self) -> &Path {
+ assert!(self.state != State::Unlocked);
+ &self.path
+ }
+
+ /// Returns the parent path containing this file
+ pub fn parent(&self) -> &Path {
+ assert!(self.state != State::Unlocked);
+ self.path.parent().unwrap()
+ }
+
+ /// Removes all sibling files to this locked file.
+ ///
+ /// This can be useful if a directory is locked with a sentinel file but it
+ /// needs to be cleared out as it may be corrupt.
+ pub fn remove_siblings(&self) -> io::Result<()> {
+ let path = self.path();
+ for entry in try!(path.parent().unwrap().read_dir()) {
+ let entry = try!(entry);
+ if Some(&entry.file_name()[..]) == path.file_name() {
+ continue
+ }
+ let kind = try!(entry.file_type());
+ if kind.is_dir() {
+ try!(fs::remove_dir_all(entry.path()));
+ } else {
+ try!(fs::remove_file(entry.path()));
+ }
+ }
+ Ok(())
+ }
+}
+
+impl Read for FileLock {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.file().read(buf)
+ }
+}
+
+impl Seek for FileLock {
+ fn seek(&mut self, to: SeekFrom) -> io::Result<u64> {
+ self.file().seek(to)
+ }
+}
+
+impl Write for FileLock {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.file().write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.file().flush()
+ }
+}
+
+impl Drop for FileLock {
+ fn drop(&mut self) {
+ if self.state != State::Unlocked {
+ if let Some(f) = self.f.take() {
+ let _ = f.unlock();
+ }
+ }
+ }
+}
+
+/// A "filesystem" is intended to be a globally shared, hence locked, resource
+/// in Cargo.
+///
+/// The `Path` of a filesystem cannot be learned unless it's done in a locked
+/// fashion, and otherwise functions on this structure are prepared to handle
+/// concurrent invocations across multiple instances of Cargo.
+#[derive(Clone)]
+pub struct Filesystem {
+ root: PathBuf,
+}
+
+impl Filesystem {
+ /// Creates a new filesystem to be rooted at the given path.
+ pub fn new(path: PathBuf) -> Filesystem {
+ Filesystem { root: path }
+ }
+
+ /// Like `Path::join`, creates a new filesystem rooted at this filesystem
+ /// joined with the given path.
+ pub fn join<T: AsRef<Path>>(&self, other: T) -> Filesystem {
+ Filesystem::new(self.root.join(other))
+ }
+
+ /// Consumes this filesystem and returns the underlying `PathBuf`.
+ ///
+ /// Note that this is a relatively dangerous operation and should be used
+ /// with great caution!.
+ pub fn into_path_unlocked(self) -> PathBuf {
+ self.root
+ }
+
+ /// Creates the directory pointed to by this filesystem.
+ ///
+ /// Handles errors where other Cargo processes are also attempting to
+ /// concurrently create this directory.
+ pub fn create_dir(&self) -> io::Result<()> {
+ return create_dir_all(&self.root);
+ }
+
+ /// Opens exclusive access to a file, returning the locked version of a
+ /// file.
+ ///
+ /// This function will create a file at `path` if it doesn't already exist
+ /// (including intermediate directories), and then it will acquire an
+ /// exclusive lock on `path`. If the process must block waiting for the
+ /// lock, the `msg` is printed to `config`.
+ ///
+ /// The returned file can be accessed to look at the path and also has
+ /// read/write access to the underlying file.
+ pub fn open_rw<P>(&self,
+ path: P,
+ config: &Config,
+ msg: &str) -> CargoResult<FileLock>
+ where P: AsRef<Path>
+ {
+ self.open(path.as_ref(),
+ OpenOptions::new().read(true).write(true).create(true),
+ State::Exclusive,
+ config,
+ msg)
+ }
+
+ /// Opens shared access to a file, returning the locked version of a file.
+ ///
+ /// This function will fail if `path` doesn't already exist, but if it does
+ /// then it will acquire a shared lock on `path`. If the process must block
+ /// waiting for the lock, the `msg` is printed to `config`.
+ ///
+ /// The returned file can be accessed to look at the path and also has read
+ /// access to the underlying file. Any writes to the file will return an
+ /// error.
+ pub fn open_ro<P>(&self,
+ path: P,
+ config: &Config,
+ msg: &str) -> CargoResult<FileLock>
+ where P: AsRef<Path>
+ {
+ self.open(path.as_ref(),
+ OpenOptions::new().read(true),
+ State::Shared,
+ config,
+ msg)
+ }
+
+ fn open(&self,
+ path: &Path,
+ opts: &OpenOptions,
+ state: State,
+ config: &Config,
+ msg: &str) -> CargoResult<FileLock> {
+ let path = self.root.join(path);
+
+ // If we want an exclusive lock then if we fail because of NotFound it's
+ // likely because an intermediate directory didn't exist, so try to
+ // create the directory and then continue.
+ let f = try!(opts.open(&path).or_else(|e| {
+ if e.kind() == io::ErrorKind::NotFound && state == State::Exclusive {
+ try!(create_dir_all(path.parent().unwrap()));
+ opts.open(&path)
+ } else {
+ Err(e)
+ }
+ }).chain_error(|| {
+ human(format!("failed to open: {}", path.display()))
+ }));
+ match state {
+ State::Exclusive => {
+ try!(acquire(config, msg, &path,
+ &|| f.try_lock_exclusive(),
+ &|| f.lock_exclusive()));
+ }
+ State::Shared => {
+ try!(acquire(config, msg, &path,
+ &|| f.try_lock_shared(),
+ &|| f.lock_shared()));
+ }
+ State::Unlocked => {}
+
+ }
+ Ok(FileLock { f: Some(f), path: path, state: state })
+ }
+}
+
+/// Acquires a lock on a file in a "nice" manner.
+///
+/// Almost all long-running blocking actions in Cargo have a status message
+/// associated with them as we're not sure how long they'll take. Whenever a
+/// conflicted file lock happens, this is the case (we're not sure when the lock
+/// will be released).
+///
+/// This function will acquire the lock on a `path`, printing out a nice message
+/// to the console if we have to wait for it. It will first attempt to use `try`
+/// to acquire a lock on the crate, and in the case of contention it will emit a
+/// status message based on `msg` to `config`'s shell, and then use `block` to
+/// block waiting to acquire a lock.
+///
+/// Returns an error if the lock could not be acquired or if any error other
+/// than a contention error happens.
+fn acquire(config: &Config,
+ msg: &str,
+ path: &Path,
+ try: &Fn() -> io::Result<()>,
+ block: &Fn() -> io::Result<()>) -> CargoResult<()> {
+ match try() {
+ Ok(()) => return Ok(()),
+ Err(e) => {
+ if e.raw_os_error() != lock_contended_error().raw_os_error() {
+ return Err(human(e)).chain_error(|| {
+ human(format!("failed to lock file: {}", path.display()))
+ })
+ }
+ }
+ }
+ let msg = format!("waiting for file lock on {}", msg);
+ try!(config.shell().err().say_status("Blocking", &msg, CYAN));
+
+ block().chain_error(|| {
+ human(format!("failed to lock file: {}", path.display()))
+ })
+}
+
+fn create_dir_all(path: &Path) -> io::Result<()> {
+ match create_dir(path) {
+ Ok(()) => return Ok(()),
+ Err(e) => {
+ if e.kind() == io::ErrorKind::NotFound {
+ if let Some(p) = path.parent() {
+ return create_dir_all(p).and_then(|()| create_dir(path))
+ }
+ }
+ Err(e)
+ }
+ }
+}
+
+fn create_dir(path: &Path) -> io::Result<()> {
+ match fs::create_dir(path) {
+ Ok(()) => Ok(()),
+ Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok(()),
+ Err(e) => Err(e),
+ }
+}
pub use self::errors::{CliError, ProcessError, CargoTestError};
pub use self::errors::{Human, caused_human};
pub use self::errors::{process_error, internal_error, internal, human};
+pub use self::flock::{FileLock, Filesystem};
pub use self::graph::Graph;
pub use self::hex::{to_hex, short_hash, hash_u64};
pub use self::lazy_cell::LazyCell;
mod shell_escape;
mod vcs;
mod lazy_cell;
+mod flock;
&repo.find_tree(tree_id).unwrap(),
&parents).unwrap()
}
+
+pub fn tag(repo: &git2::Repository, name: &str) {
+ let head = repo.head().unwrap().target().unwrap();
+ repo.tag(name,
+ &repo.find_object(head, None).unwrap(),
+ &repo.signature().unwrap(),
+ "make a new tag",
+ false).unwrap();
+}
}
}
+impl ham::Matcher<Output> for Execs {
+ fn matches(&self, output: Output) -> ham::MatchResult {
+ self.match_output(&output)
+ }
+}
+
pub fn execs() -> Execs {
Execs {
expect_stdout: None,
--- /dev/null
+use std::env;
+use std::fs::{self, File};
+use std::io::Write;
+use std::net::TcpListener;
+use std::process::Stdio;
+use std::thread;
+
+use git2;
+use hamcrest::{assert_that, existing_file};
+
+use support::{execs, project, ERROR};
+use support::git;
+use support::registry::Package;
+use test_cargo_install::{cargo_home, has_installed_exe};
+
+fn setup() {}
+
+test!(multiple_installs {
+ let p = project("foo")
+ .file("a/Cargo.toml", r#"
+ [package]
+ name = "foo"
+ authors = []
+ version = "0.0.0"
+ "#)
+ .file("a/src/main.rs", "fn main() {}")
+ .file("b/Cargo.toml", r#"
+ [package]
+ name = "bar"
+ authors = []
+ version = "0.0.0"
+ "#)
+ .file("b/src/main.rs", "fn main() {}");
+ p.build();
+
+ let mut a = p.cargo("install").cwd(p.root().join("a")).build_command();
+ let mut b = p.cargo("install").cwd(p.root().join("b")).build_command();
+
+ a.stdout(Stdio::piped()).stderr(Stdio::piped());
+ b.stdout(Stdio::piped()).stderr(Stdio::piped());
+
+ let a = a.spawn().unwrap();
+ let b = b.spawn().unwrap();
+ let a = thread::spawn(move || a.wait_with_output().unwrap());
+ let b = b.wait_with_output().unwrap();
+ let a = a.join().unwrap();
+
+ assert_that(a, execs().with_status(0));
+ assert_that(b, execs().with_status(0));
+
+ assert_that(cargo_home(), has_installed_exe("foo"));
+ assert_that(cargo_home(), has_installed_exe("bar"));
+});
+
+test!(one_install_should_be_bad {
+ let p = project("foo")
+ .file("a/Cargo.toml", r#"
+ [package]
+ name = "foo"
+ authors = []
+ version = "0.0.0"
+ "#)
+ .file("a/src/main.rs", "fn main() {}")
+ .file("b/Cargo.toml", r#"
+ [package]
+ name = "foo"
+ authors = []
+ version = "0.0.0"
+ "#)
+ .file("b/src/main.rs", "fn main() {}");
+ p.build();
+
+ let mut a = p.cargo("install").cwd(p.root().join("a")).build_command();
+ let mut b = p.cargo("install").cwd(p.root().join("b")).build_command();
+
+ a.stdout(Stdio::piped()).stderr(Stdio::piped());
+ b.stdout(Stdio::piped()).stderr(Stdio::piped());
+
+ let a = a.spawn().unwrap();
+ let b = b.spawn().unwrap();
+ let a = thread::spawn(move || a.wait_with_output().unwrap());
+ let b = b.wait_with_output().unwrap();
+ let a = a.join().unwrap();
+
+ let (bad, good) = if a.status.code() == Some(101) {(a, b)} else {(b, a)};
+ assert_that(bad, execs().with_status(101).with_stderr_contains(&format!("\
+{error} binary `foo[..]` already exists in destination as part of `[..]`
+", error = ERROR)));
+ assert_that(good, execs().with_status(0).with_stderr("\
+be sure to add `[..]` to your PATH [..]
+"));
+
+ assert_that(cargo_home(), has_installed_exe("foo"));
+});
+
+test!(multiple_registry_fetches {
+ let mut pkg = Package::new("bar", "1.0.2");
+ for i in 0..10 {
+ let name = format!("foo{}", i);
+ Package::new(&name, "1.0.0").publish();
+ pkg.dep(&name, "*");
+ }
+ pkg.publish();
+
+ let p = project("foo")
+ .file("a/Cargo.toml", r#"
+ [package]
+ name = "foo"
+ authors = []
+ version = "0.0.0"
+
+ [dependencies]
+ bar = "*"
+ "#)
+ .file("a/src/main.rs", "fn main() {}")
+ .file("b/Cargo.toml", r#"
+ [package]
+ name = "bar"
+ authors = []
+ version = "0.0.0"
+
+ [dependencies]
+ bar = "*"
+ "#)
+ .file("b/src/main.rs", "fn main() {}");
+ p.build();
+
+ let mut a = p.cargo("build").cwd(p.root().join("a")).build_command();
+ let mut b = p.cargo("build").cwd(p.root().join("b")).build_command();
+
+ a.stdout(Stdio::piped()).stderr(Stdio::piped());
+ b.stdout(Stdio::piped()).stderr(Stdio::piped());
+
+ let a = a.spawn().unwrap();
+ let b = b.spawn().unwrap();
+ let a = thread::spawn(move || a.wait_with_output().unwrap());
+ let b = b.wait_with_output().unwrap();
+ let a = a.join().unwrap();
+
+ assert_that(a, execs().with_status(0));
+ assert_that(b, execs().with_status(0));
+
+ let suffix = env::consts::EXE_SUFFIX;
+ assert_that(&p.root().join("a/target/debug").join(format!("foo{}", suffix)),
+ existing_file());
+ assert_that(&p.root().join("b/target/debug").join(format!("bar{}", suffix)),
+ existing_file());
+});
+
+test!(git_same_repo_different_tags {
+ let a = git::new("dep", |project| {
+ project.file("Cargo.toml", r#"
+ [project]
+ name = "dep"
+ version = "0.5.0"
+ authors = []
+ "#).file("src/lib.rs", "pub fn tag1() {}")
+ }).unwrap();
+
+ let repo = git2::Repository::open(&a.root()).unwrap();
+ git::tag(&repo, "tag1");
+
+ File::create(a.root().join("src/lib.rs")).unwrap()
+ .write_all(b"pub fn tag2() {}").unwrap();
+ git::add(&repo);
+ git::commit(&repo);
+ git::tag(&repo, "tag2");
+
+ let p = project("foo")
+ .file("a/Cargo.toml", &format!(r#"
+ [package]
+ name = "foo"
+ authors = []
+ version = "0.0.0"
+
+ [dependencies]
+ dep = {{ git = '{}', tag = 'tag1' }}
+ "#, a.url()))
+ .file("a/src/main.rs", "extern crate dep; fn main() { dep::tag1(); }")
+ .file("b/Cargo.toml", &format!(r#"
+ [package]
+ name = "bar"
+ authors = []
+ version = "0.0.0"
+
+ [dependencies]
+ dep = {{ git = '{}', tag = 'tag2' }}
+ "#, a.url()))
+ .file("b/src/main.rs", "extern crate dep; fn main() { dep::tag2(); }");
+ p.build();
+
+ let mut a = p.cargo("build").arg("-v").cwd(p.root().join("a")).build_command();
+ let mut b = p.cargo("build").arg("-v").cwd(p.root().join("b")).build_command();
+
+ a.stdout(Stdio::piped()).stderr(Stdio::piped());
+ b.stdout(Stdio::piped()).stderr(Stdio::piped());
+
+ let a = a.spawn().unwrap();
+ let b = b.spawn().unwrap();
+ let a = thread::spawn(move || a.wait_with_output().unwrap());
+ let b = b.wait_with_output().unwrap();
+ let a = a.join().unwrap();
+
+ assert_that(a, execs().with_status(0));
+ assert_that(b, execs().with_status(0));
+});
+
+test!(git_same_branch_different_revs {
+ let a = git::new("dep", |project| {
+ project.file("Cargo.toml", r#"
+ [project]
+ name = "dep"
+ version = "0.5.0"
+ authors = []
+ "#).file("src/lib.rs", "pub fn f1() {}")
+ }).unwrap();
+
+ let p = project("foo")
+ .file("a/Cargo.toml", &format!(r#"
+ [package]
+ name = "foo"
+ authors = []
+ version = "0.0.0"
+
+ [dependencies]
+ dep = {{ git = '{}' }}
+ "#, a.url()))
+ .file("a/src/main.rs", "extern crate dep; fn main() { dep::f1(); }")
+ .file("b/Cargo.toml", &format!(r#"
+ [package]
+ name = "bar"
+ authors = []
+ version = "0.0.0"
+
+ [dependencies]
+ dep = {{ git = '{}' }}
+ "#, a.url()))
+ .file("b/src/main.rs", "extern crate dep; fn main() { dep::f2(); }");
+ p.build();
+
+ // Generate a Cargo.lock pointing at the current rev, then clear out the
+ // target directory
+ assert_that(p.cargo("build").cwd(p.root().join("a")),
+ execs().with_status(0));
+ fs::remove_dir_all(p.root().join("a/target")).unwrap();
+
+ // Make a new commit on the master branch
+ let repo = git2::Repository::open(&a.root()).unwrap();
+ File::create(a.root().join("src/lib.rs")).unwrap()
+ .write_all(b"pub fn f2() {}").unwrap();
+ git::add(&repo);
+ git::commit(&repo);
+
+ // Now run both builds in parallel. The build of `b` should pick up the
+ // newest commit while the build of `a` should use the locked old commit.
+ let mut a = p.cargo("build").cwd(p.root().join("a")).build_command();
+ let mut b = p.cargo("build").cwd(p.root().join("b")).build_command();
+
+ a.stdout(Stdio::piped()).stderr(Stdio::piped());
+ b.stdout(Stdio::piped()).stderr(Stdio::piped());
+
+ let a = a.spawn().unwrap();
+ let b = b.spawn().unwrap();
+ let a = thread::spawn(move || a.wait_with_output().unwrap());
+ let b = b.wait_with_output().unwrap();
+ let a = a.join().unwrap();
+
+ assert_that(a, execs().with_status(0));
+ assert_that(b, execs().with_status(0));
+});
+
+test!(same_project {
+ let p = project("foo")
+ .file("Cargo.toml", r#"
+ [package]
+ name = "foo"
+ authors = []
+ version = "0.0.0"
+ "#)
+ .file("src/main.rs", "fn main() {}")
+ .file("src/lib.rs", "");
+ p.build();
+
+ let mut a = p.cargo("build").build_command();
+ let mut b = p.cargo("build").build_command();
+
+ a.stdout(Stdio::piped()).stderr(Stdio::piped());
+ b.stdout(Stdio::piped()).stderr(Stdio::piped());
+
+ let a = a.spawn().unwrap();
+ let b = b.spawn().unwrap();
+ let a = thread::spawn(move || a.wait_with_output().unwrap());
+ let b = b.wait_with_output().unwrap();
+ let a = a.join().unwrap();
+
+ assert_that(a, execs().with_status(0));
+ assert_that(b, execs().with_status(0));
+});
+
+// Make sure that if Cargo dies while holding a lock that it's released and the
+// next Cargo to come in will take over cleanly.
+test!(killing_cargo_releases_the_lock {
+ let p = project("foo")
+ .file("Cargo.toml", r#"
+ [package]
+ name = "foo"
+ authors = []
+ version = "0.0.0"
+ build = "build.rs"
+ "#)
+ .file("src/main.rs", "fn main() {}")
+ .file("build.rs", r#"
+ use std::net::TcpStream;
+
+ fn main() {
+ if std::env::var("A").is_ok() {
+ TcpStream::connect(&std::env::var("ADDR").unwrap()[..])
+ .unwrap();
+ std::thread::sleep(std::time::Duration::new(10, 0));
+ }
+ }
+ "#);
+ p.build();
+
+ // Our build script will connect to our local TCP socket to inform us that
+ // it's started running, and that's how we know that `a` will have the lock
+ // when we kill it.
+ let l = TcpListener::bind("127.0.0.1:0").unwrap();
+ let mut a = p.cargo("build").build_command();
+ let mut b = p.cargo("build").build_command();
+ a.stdout(Stdio::piped()).stderr(Stdio::piped());
+ b.stdout(Stdio::piped()).stderr(Stdio::piped());
+ a.env("ADDR", l.local_addr().unwrap().to_string()).env("A", "a");
+ b.env("ADDR", l.local_addr().unwrap().to_string()).env_remove("A");
+
+ // Spawn `a`, wait for it to get to the build script (at which point the
+ // lock is held), then kill it.
+ let mut a = a.spawn().unwrap();
+ l.accept().unwrap();
+ a.kill().unwrap();
+
+ // Spawn `b`, then just finish the output of a/b the same way the above
+ // tests does.
+ let b = b.spawn().unwrap();
+ let a = thread::spawn(move || a.wait_with_output().unwrap());
+ let b = b.wait_with_output().unwrap();
+ let a = a.join().unwrap();
+
+ // We killed `a`, so it shouldn't succeed, but `b` should have succeeded.
+ assert!(!a.status.success());
+ assert_that(b, execs().with_status(0));
+});
use support::registry::Package;
use support::git;
-use self::InstalledExe as has_installed_exe;
+pub use self::InstalledExe as has_installed_exe;
fn setup() {
}
if cfg!(windows) {format!("{}.exe", name)} else {name.to_string()}
}
-fn cargo_home() -> PathBuf {
+pub fn cargo_home() -> PathBuf {
paths::home().join(".cargo")
}
-struct InstalledExe(&'static str);
+pub struct InstalledExe(pub &'static str);
impl<P: AsRef<Path>> Matcher<P> for InstalledExe {
fn matches(&self, path: P) -> MatchResult {
mod test_cargo_read_manifest;
mod test_cargo_registry;
mod test_cargo_run;
+mod test_cargo_concurrent;
mod test_cargo_rustc;
mod test_cargo_rustdoc;
mod test_cargo_search;